From 24deedded8b3a00d9b253f1b3097c6adb8eb2309 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Mon, 7 Oct 2024 19:43:24 -0700 Subject: [PATCH 1/2] [clang][RISCV] Correct the SEW operand of indexed/fault only first segment intrinsics Indexed segment load/store intrinsics don't have SEW information encoded in the name, so we need to get the information from its pointer type argument at runtime. --- .../non-overloaded/bfloat16/vloxseg2ei16.c | 20 +- .../non-overloaded/bfloat16/vloxseg3ei16.c | 16 +- .../non-overloaded/bfloat16/vloxseg4ei16.c | 16 +- .../non-overloaded/bfloat16/vloxseg5ei16.c | 12 +- .../non-overloaded/bfloat16/vloxseg6ei16.c | 12 +- .../non-overloaded/bfloat16/vloxseg7ei16.c | 12 +- .../non-overloaded/bfloat16/vloxseg8ei16.c | 12 +- .../non-overloaded/bfloat16/vlseg2e16ff.c | 20 +- .../non-overloaded/bfloat16/vlseg3e16ff.c | 16 +- .../non-overloaded/bfloat16/vlseg4e16ff.c | 16 +- .../non-overloaded/bfloat16/vlseg5e16ff.c | 12 +- .../non-overloaded/bfloat16/vlseg6e16ff.c | 12 +- .../non-overloaded/bfloat16/vlseg7e16ff.c | 12 +- .../non-overloaded/bfloat16/vlseg8e16ff.c | 12 +- .../non-overloaded/bfloat16/vluxseg2ei16.c | 20 +- .../non-overloaded/bfloat16/vluxseg3ei16.c | 16 +- .../non-overloaded/bfloat16/vluxseg4ei16.c | 16 +- .../non-overloaded/bfloat16/vluxseg5ei16.c | 12 +- .../non-overloaded/bfloat16/vluxseg6ei16.c | 12 +- .../non-overloaded/bfloat16/vluxseg7ei16.c | 12 +- .../non-overloaded/bfloat16/vluxseg8ei16.c | 12 +- .../non-overloaded/bfloat16/vsoxseg2ei16.c | 20 +- .../non-overloaded/bfloat16/vsoxseg3ei16.c | 16 +- .../non-overloaded/bfloat16/vsoxseg4ei16.c | 16 +- .../non-overloaded/bfloat16/vsoxseg5ei16.c | 12 +- .../non-overloaded/bfloat16/vsoxseg6ei16.c | 12 +- .../non-overloaded/bfloat16/vsoxseg7ei16.c | 12 +- .../non-overloaded/bfloat16/vsoxseg8ei16.c | 12 +- .../non-overloaded/bfloat16/vsuxseg2ei16.c | 20 +- .../non-overloaded/bfloat16/vsuxseg3ei16.c | 16 +- .../non-overloaded/bfloat16/vsuxseg4ei16.c | 16 +- .../non-overloaded/bfloat16/vsuxseg5ei16.c | 12 +- .../non-overloaded/bfloat16/vsuxseg6ei16.c | 12 +- .../non-overloaded/bfloat16/vsuxseg7ei16.c | 12 +- .../non-overloaded/bfloat16/vsuxseg8ei16.c | 12 +- .../non-policy/non-overloaded/vloxseg2ei16.c | 192 ++++----- .../non-policy/non-overloaded/vloxseg2ei32.c | 184 ++++----- .../non-policy/non-overloaded/vloxseg2ei64.c | 164 ++++---- .../non-policy/non-overloaded/vloxseg2ei8.c | 192 ++++----- .../non-policy/non-overloaded/vloxseg3ei16.c | 148 +++---- .../non-policy/non-overloaded/vloxseg3ei32.c | 148 +++---- .../non-policy/non-overloaded/vloxseg3ei64.c | 140 +++---- .../non-policy/non-overloaded/vloxseg3ei8.c | 148 +++---- .../non-policy/non-overloaded/vloxseg4ei16.c | 148 +++---- .../non-policy/non-overloaded/vloxseg4ei32.c | 148 +++---- .../non-policy/non-overloaded/vloxseg4ei64.c | 140 +++---- .../non-policy/non-overloaded/vloxseg4ei8.c | 148 +++---- .../non-policy/non-overloaded/vloxseg5ei16.c | 104 ++--- .../non-policy/non-overloaded/vloxseg5ei32.c | 104 ++--- .../non-policy/non-overloaded/vloxseg5ei64.c | 104 ++--- .../non-policy/non-overloaded/vloxseg5ei8.c | 104 ++--- .../non-policy/non-overloaded/vloxseg6ei16.c | 104 ++--- .../non-policy/non-overloaded/vloxseg6ei32.c | 104 ++--- .../non-policy/non-overloaded/vloxseg6ei64.c | 104 ++--- .../non-policy/non-overloaded/vloxseg6ei8.c | 104 ++--- .../non-policy/non-overloaded/vloxseg7ei16.c | 104 ++--- .../non-policy/non-overloaded/vloxseg7ei32.c | 104 ++--- .../non-policy/non-overloaded/vloxseg7ei64.c | 104 ++--- .../non-policy/non-overloaded/vloxseg7ei8.c | 104 ++--- .../non-policy/non-overloaded/vloxseg8ei16.c | 104 ++--- .../non-policy/non-overloaded/vloxseg8ei32.c | 104 ++--- .../non-policy/non-overloaded/vloxseg8ei64.c | 104 ++--- .../non-policy/non-overloaded/vloxseg8ei8.c | 104 ++--- .../non-policy/non-overloaded/vlseg2e16ff.c | 60 +-- .../non-policy/non-overloaded/vlseg2e32ff.c | 48 +-- .../non-policy/non-overloaded/vlseg2e64ff.c | 36 +- .../non-policy/non-overloaded/vlseg2e8ff.c | 48 +-- .../non-policy/non-overloaded/vlseg3e16ff.c | 48 +-- .../non-policy/non-overloaded/vlseg3e32ff.c | 36 +- .../non-policy/non-overloaded/vlseg3e64ff.c | 24 +- .../non-policy/non-overloaded/vlseg3e8ff.c | 40 +- .../non-policy/non-overloaded/vlseg4e16ff.c | 48 +-- .../non-policy/non-overloaded/vlseg4e32ff.c | 36 +- .../non-policy/non-overloaded/vlseg4e64ff.c | 24 +- .../non-policy/non-overloaded/vlseg4e8ff.c | 40 +- .../non-policy/non-overloaded/vlseg5e16ff.c | 36 +- .../non-policy/non-overloaded/vlseg5e32ff.c | 24 +- .../non-policy/non-overloaded/vlseg5e64ff.c | 12 +- .../non-policy/non-overloaded/vlseg5e8ff.c | 32 +- .../non-policy/non-overloaded/vlseg6e16ff.c | 36 +- .../non-policy/non-overloaded/vlseg6e32ff.c | 24 +- .../non-policy/non-overloaded/vlseg6e64ff.c | 12 +- .../non-policy/non-overloaded/vlseg6e8ff.c | 32 +- .../non-policy/non-overloaded/vlseg7e16ff.c | 36 +- .../non-policy/non-overloaded/vlseg7e32ff.c | 24 +- .../non-policy/non-overloaded/vlseg7e64ff.c | 12 +- .../non-policy/non-overloaded/vlseg7e8ff.c | 32 +- .../non-policy/non-overloaded/vlseg8e16ff.c | 36 +- .../non-policy/non-overloaded/vlseg8e32ff.c | 24 +- .../non-policy/non-overloaded/vlseg8e64ff.c | 12 +- .../non-policy/non-overloaded/vlseg8e8ff.c | 32 +- .../non-policy/non-overloaded/vluxseg2ei16.c | 192 ++++----- .../non-policy/non-overloaded/vluxseg2ei32.c | 184 ++++----- .../non-policy/non-overloaded/vluxseg2ei64.c | 164 ++++---- .../non-policy/non-overloaded/vluxseg2ei8.c | 192 ++++----- .../non-policy/non-overloaded/vluxseg3ei16.c | 148 +++---- .../non-policy/non-overloaded/vluxseg3ei32.c | 148 +++---- .../non-policy/non-overloaded/vluxseg3ei64.c | 140 +++---- .../non-policy/non-overloaded/vluxseg3ei8.c | 148 +++---- .../non-policy/non-overloaded/vluxseg4ei16.c | 148 +++---- .../non-policy/non-overloaded/vluxseg4ei32.c | 148 +++---- .../non-policy/non-overloaded/vluxseg4ei64.c | 140 +++---- .../non-policy/non-overloaded/vluxseg4ei8.c | 148 +++---- .../non-policy/non-overloaded/vluxseg5ei16.c | 104 ++--- .../non-policy/non-overloaded/vluxseg5ei32.c | 104 ++--- .../non-policy/non-overloaded/vluxseg5ei64.c | 104 ++--- .../non-policy/non-overloaded/vluxseg5ei8.c | 104 ++--- .../non-policy/non-overloaded/vluxseg6ei16.c | 104 ++--- .../non-policy/non-overloaded/vluxseg6ei32.c | 104 ++--- .../non-policy/non-overloaded/vluxseg6ei64.c | 104 ++--- .../non-policy/non-overloaded/vluxseg6ei8.c | 104 ++--- .../non-policy/non-overloaded/vluxseg7ei16.c | 104 ++--- .../non-policy/non-overloaded/vluxseg7ei32.c | 104 ++--- .../non-policy/non-overloaded/vluxseg7ei64.c | 104 ++--- .../non-policy/non-overloaded/vluxseg7ei8.c | 104 ++--- .../non-policy/non-overloaded/vluxseg8ei16.c | 104 ++--- .../non-policy/non-overloaded/vluxseg8ei32.c | 104 ++--- .../non-policy/non-overloaded/vluxseg8ei64.c | 104 ++--- .../non-policy/non-overloaded/vluxseg8ei8.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg2ei16.c | 192 ++++----- .../non-policy/non-overloaded/vsoxseg2ei32.c | 184 ++++----- .../non-policy/non-overloaded/vsoxseg2ei64.c | 164 ++++---- .../non-policy/non-overloaded/vsoxseg2ei8.c | 192 ++++----- .../non-policy/non-overloaded/vsoxseg3ei16.c | 148 +++---- .../non-policy/non-overloaded/vsoxseg3ei32.c | 148 +++---- .../non-policy/non-overloaded/vsoxseg3ei64.c | 140 +++---- .../non-policy/non-overloaded/vsoxseg3ei8.c | 148 +++---- .../non-policy/non-overloaded/vsoxseg4ei16.c | 148 +++---- .../non-policy/non-overloaded/vsoxseg4ei32.c | 148 +++---- .../non-policy/non-overloaded/vsoxseg4ei64.c | 140 +++---- .../non-policy/non-overloaded/vsoxseg4ei8.c | 148 +++---- .../non-policy/non-overloaded/vsoxseg5ei16.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg5ei32.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg5ei64.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg5ei8.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg6ei16.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg6ei32.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg6ei64.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg6ei8.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg7ei16.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg7ei32.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg7ei64.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg7ei8.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg8ei16.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg8ei32.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg8ei64.c | 104 ++--- .../non-policy/non-overloaded/vsoxseg8ei8.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg2ei16.c | 192 ++++----- .../non-policy/non-overloaded/vsuxseg2ei32.c | 184 ++++----- .../non-policy/non-overloaded/vsuxseg2ei64.c | 164 ++++---- .../non-policy/non-overloaded/vsuxseg2ei8.c | 192 ++++----- .../non-policy/non-overloaded/vsuxseg3ei16.c | 148 +++---- .../non-policy/non-overloaded/vsuxseg3ei32.c | 148 +++---- .../non-policy/non-overloaded/vsuxseg3ei64.c | 140 +++---- .../non-policy/non-overloaded/vsuxseg3ei8.c | 148 +++---- .../non-policy/non-overloaded/vsuxseg4ei16.c | 148 +++---- .../non-policy/non-overloaded/vsuxseg4ei32.c | 148 +++---- .../non-policy/non-overloaded/vsuxseg4ei64.c | 140 +++---- .../non-policy/non-overloaded/vsuxseg4ei8.c | 148 +++---- .../non-policy/non-overloaded/vsuxseg5ei16.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg5ei32.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg5ei64.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg5ei8.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg6ei16.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg6ei32.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg6ei64.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg6ei8.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg7ei16.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg7ei32.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg7ei64.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg7ei8.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg8ei16.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg8ei32.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg8ei64.c | 104 ++--- .../non-policy/non-overloaded/vsuxseg8ei8.c | 104 ++--- .../overloaded/bfloat16/vloxseg2ei16.c | 20 +- .../overloaded/bfloat16/vloxseg3ei16.c | 16 +- .../overloaded/bfloat16/vloxseg4ei16.c | 16 +- .../overloaded/bfloat16/vloxseg5ei16.c | 12 +- .../overloaded/bfloat16/vloxseg6ei16.c | 12 +- .../overloaded/bfloat16/vloxseg7ei16.c | 12 +- .../overloaded/bfloat16/vloxseg8ei16.c | 12 +- .../overloaded/bfloat16/vlseg2e16ff.c | 10 +- .../overloaded/bfloat16/vlseg3e16ff.c | 8 +- .../overloaded/bfloat16/vlseg4e16ff.c | 8 +- .../overloaded/bfloat16/vlseg5e16ff.c | 6 +- .../overloaded/bfloat16/vlseg6e16ff.c | 6 +- .../overloaded/bfloat16/vlseg7e16ff.c | 6 +- .../overloaded/bfloat16/vlseg8e16ff.c | 6 +- .../overloaded/bfloat16/vluxseg2ei16.c | 20 +- .../overloaded/bfloat16/vluxseg3ei16.c | 16 +- .../overloaded/bfloat16/vluxseg4ei16.c | 16 +- .../overloaded/bfloat16/vluxseg5ei16.c | 12 +- .../overloaded/bfloat16/vluxseg6ei16.c | 12 +- .../overloaded/bfloat16/vluxseg7ei16.c | 12 +- .../overloaded/bfloat16/vluxseg8ei16.c | 12 +- .../overloaded/bfloat16/vsoxseg2ei16.c | 20 +- .../overloaded/bfloat16/vsoxseg3ei16.c | 16 +- .../overloaded/bfloat16/vsoxseg4ei16.c | 16 +- .../overloaded/bfloat16/vsoxseg5ei16.c | 12 +- .../overloaded/bfloat16/vsoxseg6ei16.c | 12 +- .../overloaded/bfloat16/vsoxseg7ei16.c | 12 +- .../overloaded/bfloat16/vsoxseg8ei16.c | 12 +- .../overloaded/bfloat16/vsuxseg2ei16.c | 20 +- .../overloaded/bfloat16/vsuxseg3ei16.c | 16 +- .../overloaded/bfloat16/vsuxseg4ei16.c | 16 +- .../overloaded/bfloat16/vsuxseg5ei16.c | 12 +- .../overloaded/bfloat16/vsuxseg6ei16.c | 12 +- .../overloaded/bfloat16/vsuxseg7ei16.c | 12 +- .../overloaded/bfloat16/vsuxseg8ei16.c | 12 +- .../non-policy/overloaded/vloxseg2ei16.c | 192 ++++----- .../non-policy/overloaded/vloxseg2ei32.c | 184 ++++----- .../non-policy/overloaded/vloxseg2ei64.c | 164 ++++---- .../non-policy/overloaded/vloxseg2ei8.c | 192 ++++----- .../non-policy/overloaded/vloxseg3ei16.c | 148 +++---- .../non-policy/overloaded/vloxseg3ei32.c | 148 +++---- .../non-policy/overloaded/vloxseg3ei64.c | 140 +++---- .../non-policy/overloaded/vloxseg3ei8.c | 148 +++---- .../non-policy/overloaded/vloxseg4ei16.c | 148 +++---- .../non-policy/overloaded/vloxseg4ei32.c | 148 +++---- .../non-policy/overloaded/vloxseg4ei64.c | 140 +++---- .../non-policy/overloaded/vloxseg4ei8.c | 148 +++---- .../non-policy/overloaded/vloxseg5ei16.c | 104 ++--- .../non-policy/overloaded/vloxseg5ei32.c | 104 ++--- .../non-policy/overloaded/vloxseg5ei64.c | 104 ++--- .../non-policy/overloaded/vloxseg5ei8.c | 104 ++--- .../non-policy/overloaded/vloxseg6ei16.c | 104 ++--- .../non-policy/overloaded/vloxseg6ei32.c | 104 ++--- .../non-policy/overloaded/vloxseg6ei64.c | 104 ++--- .../non-policy/overloaded/vloxseg6ei8.c | 104 ++--- .../non-policy/overloaded/vloxseg7ei16.c | 104 ++--- .../non-policy/overloaded/vloxseg7ei32.c | 104 ++--- .../non-policy/overloaded/vloxseg7ei64.c | 104 ++--- .../non-policy/overloaded/vloxseg7ei8.c | 104 ++--- .../non-policy/overloaded/vloxseg8ei16.c | 104 ++--- .../non-policy/overloaded/vloxseg8ei32.c | 104 ++--- .../non-policy/overloaded/vloxseg8ei64.c | 104 ++--- .../non-policy/overloaded/vloxseg8ei8.c | 104 ++--- .../non-policy/overloaded/vlseg2e16ff.c | 30 +- .../non-policy/overloaded/vlseg2e32ff.c | 24 +- .../non-policy/overloaded/vlseg2e64ff.c | 18 +- .../non-policy/overloaded/vlseg2e8ff.c | 24 +- .../non-policy/overloaded/vlseg3e16ff.c | 24 +- .../non-policy/overloaded/vlseg3e32ff.c | 18 +- .../non-policy/overloaded/vlseg3e64ff.c | 12 +- .../non-policy/overloaded/vlseg3e8ff.c | 20 +- .../non-policy/overloaded/vlseg4e16ff.c | 24 +- .../non-policy/overloaded/vlseg4e32ff.c | 18 +- .../non-policy/overloaded/vlseg4e64ff.c | 12 +- .../non-policy/overloaded/vlseg4e8ff.c | 20 +- .../non-policy/overloaded/vlseg5e16ff.c | 18 +- .../non-policy/overloaded/vlseg5e32ff.c | 12 +- .../non-policy/overloaded/vlseg5e64ff.c | 6 +- .../non-policy/overloaded/vlseg5e8ff.c | 16 +- .../non-policy/overloaded/vlseg6e16ff.c | 18 +- .../non-policy/overloaded/vlseg6e32ff.c | 12 +- .../non-policy/overloaded/vlseg6e64ff.c | 6 +- .../non-policy/overloaded/vlseg6e8ff.c | 16 +- .../non-policy/overloaded/vlseg7e16ff.c | 18 +- .../non-policy/overloaded/vlseg7e32ff.c | 12 +- .../non-policy/overloaded/vlseg7e64ff.c | 6 +- .../non-policy/overloaded/vlseg7e8ff.c | 16 +- .../non-policy/overloaded/vlseg8e16ff.c | 18 +- .../non-policy/overloaded/vlseg8e32ff.c | 12 +- .../non-policy/overloaded/vlseg8e64ff.c | 6 +- .../non-policy/overloaded/vlseg8e8ff.c | 16 +- .../non-policy/overloaded/vluxseg2ei16.c | 192 ++++----- .../non-policy/overloaded/vluxseg2ei32.c | 184 ++++----- .../non-policy/overloaded/vluxseg2ei64.c | 164 ++++---- .../non-policy/overloaded/vluxseg2ei8.c | 192 ++++----- .../non-policy/overloaded/vluxseg3ei16.c | 148 +++---- .../non-policy/overloaded/vluxseg3ei32.c | 148 +++---- .../non-policy/overloaded/vluxseg3ei64.c | 140 +++---- .../non-policy/overloaded/vluxseg3ei8.c | 148 +++---- .../non-policy/overloaded/vluxseg4ei16.c | 148 +++---- .../non-policy/overloaded/vluxseg4ei32.c | 148 +++---- .../non-policy/overloaded/vluxseg4ei64.c | 140 +++---- .../non-policy/overloaded/vluxseg4ei8.c | 148 +++---- .../non-policy/overloaded/vluxseg5ei16.c | 104 ++--- .../non-policy/overloaded/vluxseg5ei32.c | 104 ++--- .../non-policy/overloaded/vluxseg5ei64.c | 104 ++--- .../non-policy/overloaded/vluxseg5ei8.c | 104 ++--- .../non-policy/overloaded/vluxseg6ei16.c | 104 ++--- .../non-policy/overloaded/vluxseg6ei32.c | 104 ++--- .../non-policy/overloaded/vluxseg6ei64.c | 104 ++--- .../non-policy/overloaded/vluxseg6ei8.c | 104 ++--- .../non-policy/overloaded/vluxseg7ei16.c | 104 ++--- .../non-policy/overloaded/vluxseg7ei32.c | 104 ++--- .../non-policy/overloaded/vluxseg7ei64.c | 104 ++--- .../non-policy/overloaded/vluxseg7ei8.c | 104 ++--- .../non-policy/overloaded/vluxseg8ei16.c | 104 ++--- .../non-policy/overloaded/vluxseg8ei32.c | 104 ++--- .../non-policy/overloaded/vluxseg8ei64.c | 104 ++--- .../non-policy/overloaded/vluxseg8ei8.c | 104 ++--- .../non-policy/overloaded/vsoxseg2ei16.c | 192 ++++----- .../non-policy/overloaded/vsoxseg2ei32.c | 184 ++++----- .../non-policy/overloaded/vsoxseg2ei64.c | 164 ++++---- .../non-policy/overloaded/vsoxseg2ei8.c | 192 ++++----- .../non-policy/overloaded/vsoxseg3ei16.c | 148 +++---- .../non-policy/overloaded/vsoxseg3ei32.c | 148 +++---- .../non-policy/overloaded/vsoxseg3ei64.c | 140 +++---- .../non-policy/overloaded/vsoxseg3ei8.c | 148 +++---- .../non-policy/overloaded/vsoxseg4ei16.c | 148 +++---- .../non-policy/overloaded/vsoxseg4ei32.c | 148 +++---- .../non-policy/overloaded/vsoxseg4ei64.c | 140 +++---- .../non-policy/overloaded/vsoxseg4ei8.c | 148 +++---- .../non-policy/overloaded/vsoxseg5ei16.c | 104 ++--- .../non-policy/overloaded/vsoxseg5ei32.c | 104 ++--- .../non-policy/overloaded/vsoxseg5ei64.c | 104 ++--- .../non-policy/overloaded/vsoxseg5ei8.c | 104 ++--- .../non-policy/overloaded/vsoxseg6ei16.c | 104 ++--- .../non-policy/overloaded/vsoxseg6ei32.c | 104 ++--- .../non-policy/overloaded/vsoxseg6ei64.c | 104 ++--- .../non-policy/overloaded/vsoxseg6ei8.c | 104 ++--- .../non-policy/overloaded/vsoxseg7ei16.c | 104 ++--- .../non-policy/overloaded/vsoxseg7ei32.c | 104 ++--- .../non-policy/overloaded/vsoxseg7ei64.c | 104 ++--- .../non-policy/overloaded/vsoxseg7ei8.c | 104 ++--- .../non-policy/overloaded/vsoxseg8ei16.c | 104 ++--- .../non-policy/overloaded/vsoxseg8ei32.c | 104 ++--- .../non-policy/overloaded/vsoxseg8ei64.c | 104 ++--- .../non-policy/overloaded/vsoxseg8ei8.c | 104 ++--- .../non-policy/overloaded/vsuxseg2ei16.c | 192 ++++----- .../non-policy/overloaded/vsuxseg2ei32.c | 184 ++++----- .../non-policy/overloaded/vsuxseg2ei64.c | 164 ++++---- .../non-policy/overloaded/vsuxseg2ei8.c | 192 ++++----- .../non-policy/overloaded/vsuxseg3ei16.c | 148 +++---- .../non-policy/overloaded/vsuxseg3ei32.c | 148 +++---- .../non-policy/overloaded/vsuxseg3ei64.c | 140 +++---- .../non-policy/overloaded/vsuxseg3ei8.c | 148 +++---- .../non-policy/overloaded/vsuxseg4ei16.c | 148 +++---- .../non-policy/overloaded/vsuxseg4ei32.c | 148 +++---- .../non-policy/overloaded/vsuxseg4ei64.c | 140 +++---- .../non-policy/overloaded/vsuxseg4ei8.c | 148 +++---- .../non-policy/overloaded/vsuxseg5ei16.c | 104 ++--- .../non-policy/overloaded/vsuxseg5ei32.c | 104 ++--- .../non-policy/overloaded/vsuxseg5ei64.c | 104 ++--- .../non-policy/overloaded/vsuxseg5ei8.c | 104 ++--- .../non-policy/overloaded/vsuxseg6ei16.c | 104 ++--- .../non-policy/overloaded/vsuxseg6ei32.c | 104 ++--- .../non-policy/overloaded/vsuxseg6ei64.c | 104 ++--- .../non-policy/overloaded/vsuxseg6ei8.c | 104 ++--- .../non-policy/overloaded/vsuxseg7ei16.c | 104 ++--- .../non-policy/overloaded/vsuxseg7ei32.c | 104 ++--- .../non-policy/overloaded/vsuxseg7ei64.c | 104 ++--- .../non-policy/overloaded/vsuxseg7ei8.c | 104 ++--- .../non-policy/overloaded/vsuxseg8ei16.c | 104 ++--- .../non-policy/overloaded/vsuxseg8ei32.c | 104 ++--- .../non-policy/overloaded/vsuxseg8ei64.c | 104 ++--- .../non-policy/overloaded/vsuxseg8ei8.c | 104 ++--- .../non-overloaded/bfloat16/vloxseg2ei16.c | 40 +- .../non-overloaded/bfloat16/vloxseg3ei16.c | 32 +- .../non-overloaded/bfloat16/vloxseg4ei16.c | 32 +- .../non-overloaded/bfloat16/vloxseg5ei16.c | 24 +- .../non-overloaded/bfloat16/vloxseg6ei16.c | 24 +- .../non-overloaded/bfloat16/vloxseg7ei16.c | 24 +- .../non-overloaded/bfloat16/vloxseg8ei16.c | 24 +- .../non-overloaded/bfloat16/vlseg2e16ff.c | 40 +- .../non-overloaded/bfloat16/vlseg3e16ff.c | 32 +- .../non-overloaded/bfloat16/vlseg4e16ff.c | 32 +- .../non-overloaded/bfloat16/vlseg5e16ff.c | 24 +- .../non-overloaded/bfloat16/vlseg6e16ff.c | 24 +- .../non-overloaded/bfloat16/vlseg7e16ff.c | 24 +- .../non-overloaded/bfloat16/vlseg8e16ff.c | 24 +- .../non-overloaded/bfloat16/vluxseg2ei16.c | 40 +- .../non-overloaded/bfloat16/vluxseg3ei16.c | 32 +- .../non-overloaded/bfloat16/vluxseg4ei16.c | 32 +- .../non-overloaded/bfloat16/vluxseg5ei16.c | 24 +- .../non-overloaded/bfloat16/vluxseg6ei16.c | 24 +- .../non-overloaded/bfloat16/vluxseg7ei16.c | 24 +- .../non-overloaded/bfloat16/vluxseg8ei16.c | 24 +- .../policy/non-overloaded/vloxseg2ei16.c | 384 +++++++++--------- .../policy/non-overloaded/vloxseg2ei32.c | 368 ++++++++--------- .../policy/non-overloaded/vloxseg2ei64.c | 328 +++++++-------- .../policy/non-overloaded/vloxseg2ei8.c | 384 +++++++++--------- .../policy/non-overloaded/vloxseg3ei16.c | 296 +++++++------- .../policy/non-overloaded/vloxseg3ei32.c | 296 +++++++------- .../policy/non-overloaded/vloxseg3ei64.c | 280 ++++++------- .../policy/non-overloaded/vloxseg3ei8.c | 296 +++++++------- .../policy/non-overloaded/vloxseg4ei16.c | 296 +++++++------- .../policy/non-overloaded/vloxseg4ei32.c | 296 +++++++------- .../policy/non-overloaded/vloxseg4ei64.c | 280 ++++++------- .../policy/non-overloaded/vloxseg4ei8.c | 296 +++++++------- .../policy/non-overloaded/vloxseg5ei16.c | 208 +++++----- .../policy/non-overloaded/vloxseg5ei32.c | 208 +++++----- .../policy/non-overloaded/vloxseg5ei64.c | 208 +++++----- .../policy/non-overloaded/vloxseg5ei8.c | 208 +++++----- .../policy/non-overloaded/vloxseg6ei16.c | 208 +++++----- .../policy/non-overloaded/vloxseg6ei32.c | 208 +++++----- .../policy/non-overloaded/vloxseg6ei64.c | 208 +++++----- .../policy/non-overloaded/vloxseg6ei8.c | 208 +++++----- .../policy/non-overloaded/vloxseg7ei16.c | 208 +++++----- .../policy/non-overloaded/vloxseg7ei32.c | 208 +++++----- .../policy/non-overloaded/vloxseg7ei64.c | 208 +++++----- .../policy/non-overloaded/vloxseg7ei8.c | 208 +++++----- .../policy/non-overloaded/vloxseg8ei16.c | 208 +++++----- .../policy/non-overloaded/vloxseg8ei32.c | 208 +++++----- .../policy/non-overloaded/vloxseg8ei64.c | 208 +++++----- .../policy/non-overloaded/vloxseg8ei8.c | 208 +++++----- .../policy/non-overloaded/vlseg2e16ff.c | 120 +++--- .../policy/non-overloaded/vlseg2e32ff.c | 96 ++--- .../policy/non-overloaded/vlseg2e64ff.c | 72 ++-- .../policy/non-overloaded/vlseg2e8ff.c | 96 ++--- .../policy/non-overloaded/vlseg3e16ff.c | 96 ++--- .../policy/non-overloaded/vlseg3e32ff.c | 72 ++-- .../policy/non-overloaded/vlseg3e64ff.c | 48 +-- .../policy/non-overloaded/vlseg3e8ff.c | 80 ++-- .../policy/non-overloaded/vlseg4e16ff.c | 96 ++--- .../policy/non-overloaded/vlseg4e32ff.c | 72 ++-- .../policy/non-overloaded/vlseg4e64ff.c | 48 +-- .../policy/non-overloaded/vlseg4e8ff.c | 80 ++-- .../policy/non-overloaded/vlseg5e16ff.c | 72 ++-- .../policy/non-overloaded/vlseg5e32ff.c | 48 +-- .../policy/non-overloaded/vlseg5e64ff.c | 24 +- .../policy/non-overloaded/vlseg5e8ff.c | 64 +-- .../policy/non-overloaded/vlseg6e16ff.c | 72 ++-- .../policy/non-overloaded/vlseg6e32ff.c | 48 +-- .../policy/non-overloaded/vlseg6e64ff.c | 24 +- .../policy/non-overloaded/vlseg6e8ff.c | 64 +-- .../policy/non-overloaded/vlseg7e16ff.c | 72 ++-- .../policy/non-overloaded/vlseg7e32ff.c | 48 +-- .../policy/non-overloaded/vlseg7e64ff.c | 24 +- .../policy/non-overloaded/vlseg7e8ff.c | 64 +-- .../policy/non-overloaded/vlseg8e16ff.c | 72 ++-- .../policy/non-overloaded/vlseg8e32ff.c | 48 +-- .../policy/non-overloaded/vlseg8e64ff.c | 24 +- .../policy/non-overloaded/vlseg8e8ff.c | 64 +-- .../policy/non-overloaded/vluxseg2ei16.c | 384 +++++++++--------- .../policy/non-overloaded/vluxseg2ei32.c | 368 ++++++++--------- .../policy/non-overloaded/vluxseg2ei64.c | 328 +++++++-------- .../policy/non-overloaded/vluxseg2ei8.c | 384 +++++++++--------- .../policy/non-overloaded/vluxseg3ei16.c | 296 +++++++------- .../policy/non-overloaded/vluxseg3ei32.c | 296 +++++++------- .../policy/non-overloaded/vluxseg3ei64.c | 280 ++++++------- .../policy/non-overloaded/vluxseg3ei8.c | 296 +++++++------- .../policy/non-overloaded/vluxseg4ei16.c | 296 +++++++------- .../policy/non-overloaded/vluxseg4ei32.c | 296 +++++++------- .../policy/non-overloaded/vluxseg4ei64.c | 280 ++++++------- .../policy/non-overloaded/vluxseg4ei8.c | 296 +++++++------- .../policy/non-overloaded/vluxseg5ei16.c | 208 +++++----- .../policy/non-overloaded/vluxseg5ei32.c | 208 +++++----- .../policy/non-overloaded/vluxseg5ei64.c | 208 +++++----- .../policy/non-overloaded/vluxseg5ei8.c | 208 +++++----- .../policy/non-overloaded/vluxseg6ei16.c | 208 +++++----- .../policy/non-overloaded/vluxseg6ei32.c | 208 +++++----- .../policy/non-overloaded/vluxseg6ei64.c | 208 +++++----- .../policy/non-overloaded/vluxseg6ei8.c | 208 +++++----- .../policy/non-overloaded/vluxseg7ei16.c | 208 +++++----- .../policy/non-overloaded/vluxseg7ei32.c | 208 +++++----- .../policy/non-overloaded/vluxseg7ei64.c | 208 +++++----- .../policy/non-overloaded/vluxseg7ei8.c | 208 +++++----- .../policy/non-overloaded/vluxseg8ei16.c | 208 +++++----- .../policy/non-overloaded/vluxseg8ei32.c | 208 +++++----- .../policy/non-overloaded/vluxseg8ei64.c | 208 +++++----- .../policy/non-overloaded/vluxseg8ei8.c | 208 +++++----- .../policy/overloaded/bfloat16/vloxseg2ei16.c | 40 +- .../policy/overloaded/bfloat16/vloxseg3ei16.c | 32 +- .../policy/overloaded/bfloat16/vloxseg4ei16.c | 32 +- .../policy/overloaded/bfloat16/vloxseg5ei16.c | 24 +- .../policy/overloaded/bfloat16/vloxseg6ei16.c | 24 +- .../policy/overloaded/bfloat16/vloxseg7ei16.c | 24 +- .../policy/overloaded/bfloat16/vloxseg8ei16.c | 24 +- .../policy/overloaded/bfloat16/vlseg2e16ff.c | 40 +- .../policy/overloaded/bfloat16/vlseg3e16ff.c | 32 +- .../policy/overloaded/bfloat16/vlseg4e16ff.c | 32 +- .../policy/overloaded/bfloat16/vlseg5e16ff.c | 24 +- .../policy/overloaded/bfloat16/vlseg6e16ff.c | 24 +- .../policy/overloaded/bfloat16/vlseg7e16ff.c | 24 +- .../policy/overloaded/bfloat16/vlseg8e16ff.c | 24 +- .../policy/overloaded/bfloat16/vluxseg2ei16.c | 40 +- .../policy/overloaded/bfloat16/vluxseg3ei16.c | 32 +- .../policy/overloaded/bfloat16/vluxseg4ei16.c | 32 +- .../policy/overloaded/bfloat16/vluxseg5ei16.c | 24 +- .../policy/overloaded/bfloat16/vluxseg6ei16.c | 24 +- .../policy/overloaded/bfloat16/vluxseg7ei16.c | 24 +- .../policy/overloaded/bfloat16/vluxseg8ei16.c | 24 +- .../policy/overloaded/vloxseg2ei16.c | 384 +++++++++--------- .../policy/overloaded/vloxseg2ei32.c | 368 ++++++++--------- .../policy/overloaded/vloxseg2ei64.c | 328 +++++++-------- .../policy/overloaded/vloxseg2ei8.c | 384 +++++++++--------- .../policy/overloaded/vloxseg3ei16.c | 296 +++++++------- .../policy/overloaded/vloxseg3ei32.c | 296 +++++++------- .../policy/overloaded/vloxseg3ei64.c | 280 ++++++------- .../policy/overloaded/vloxseg3ei8.c | 296 +++++++------- .../policy/overloaded/vloxseg4ei16.c | 296 +++++++------- .../policy/overloaded/vloxseg4ei32.c | 296 +++++++------- .../policy/overloaded/vloxseg4ei64.c | 280 ++++++------- .../policy/overloaded/vloxseg4ei8.c | 296 +++++++------- .../policy/overloaded/vloxseg5ei16.c | 208 +++++----- .../policy/overloaded/vloxseg5ei32.c | 208 +++++----- .../policy/overloaded/vloxseg5ei64.c | 208 +++++----- .../policy/overloaded/vloxseg5ei8.c | 208 +++++----- .../policy/overloaded/vloxseg6ei16.c | 208 +++++----- .../policy/overloaded/vloxseg6ei32.c | 208 +++++----- .../policy/overloaded/vloxseg6ei64.c | 208 +++++----- .../policy/overloaded/vloxseg6ei8.c | 208 +++++----- .../policy/overloaded/vloxseg7ei16.c | 208 +++++----- .../policy/overloaded/vloxseg7ei32.c | 208 +++++----- .../policy/overloaded/vloxseg7ei64.c | 208 +++++----- .../policy/overloaded/vloxseg7ei8.c | 208 +++++----- .../policy/overloaded/vloxseg8ei16.c | 208 +++++----- .../policy/overloaded/vloxseg8ei32.c | 208 +++++----- .../policy/overloaded/vloxseg8ei64.c | 208 +++++----- .../policy/overloaded/vloxseg8ei8.c | 208 +++++----- .../policy/overloaded/vlseg2e16ff.c | 120 +++--- .../policy/overloaded/vlseg2e32ff.c | 96 ++--- .../policy/overloaded/vlseg2e64ff.c | 72 ++-- .../policy/overloaded/vlseg2e8ff.c | 96 ++--- .../policy/overloaded/vlseg3e16ff.c | 96 ++--- .../policy/overloaded/vlseg3e32ff.c | 72 ++-- .../policy/overloaded/vlseg3e64ff.c | 48 +-- .../policy/overloaded/vlseg3e8ff.c | 80 ++-- .../policy/overloaded/vlseg4e16ff.c | 96 ++--- .../policy/overloaded/vlseg4e32ff.c | 72 ++-- .../policy/overloaded/vlseg4e64ff.c | 48 +-- .../policy/overloaded/vlseg4e8ff.c | 80 ++-- .../policy/overloaded/vlseg5e16ff.c | 72 ++-- .../policy/overloaded/vlseg5e32ff.c | 48 +-- .../policy/overloaded/vlseg5e64ff.c | 24 +- .../policy/overloaded/vlseg5e8ff.c | 64 +-- .../policy/overloaded/vlseg6e16ff.c | 72 ++-- .../policy/overloaded/vlseg6e32ff.c | 48 +-- .../policy/overloaded/vlseg6e64ff.c | 24 +- .../policy/overloaded/vlseg6e8ff.c | 64 +-- .../policy/overloaded/vlseg7e16ff.c | 72 ++-- .../policy/overloaded/vlseg7e32ff.c | 48 +-- .../policy/overloaded/vlseg7e64ff.c | 24 +- .../policy/overloaded/vlseg7e8ff.c | 64 +-- .../policy/overloaded/vlseg8e16ff.c | 72 ++-- .../policy/overloaded/vlseg8e32ff.c | 48 +-- .../policy/overloaded/vlseg8e64ff.c | 24 +- .../policy/overloaded/vlseg8e8ff.c | 64 +-- .../policy/overloaded/vluxseg2ei16.c | 384 +++++++++--------- .../policy/overloaded/vluxseg2ei32.c | 368 ++++++++--------- .../policy/overloaded/vluxseg2ei64.c | 328 +++++++-------- .../policy/overloaded/vluxseg2ei8.c | 384 +++++++++--------- .../policy/overloaded/vluxseg3ei16.c | 296 +++++++------- .../policy/overloaded/vluxseg3ei32.c | 296 +++++++------- .../policy/overloaded/vluxseg3ei64.c | 280 ++++++------- .../policy/overloaded/vluxseg3ei8.c | 296 +++++++------- .../policy/overloaded/vluxseg4ei16.c | 296 +++++++------- .../policy/overloaded/vluxseg4ei32.c | 296 +++++++------- .../policy/overloaded/vluxseg4ei64.c | 280 ++++++------- .../policy/overloaded/vluxseg4ei8.c | 296 +++++++------- .../policy/overloaded/vluxseg5ei16.c | 208 +++++----- .../policy/overloaded/vluxseg5ei32.c | 208 +++++----- .../policy/overloaded/vluxseg5ei64.c | 208 +++++----- .../policy/overloaded/vluxseg5ei8.c | 208 +++++----- .../policy/overloaded/vluxseg6ei16.c | 208 +++++----- .../policy/overloaded/vluxseg6ei32.c | 208 +++++----- .../policy/overloaded/vluxseg6ei64.c | 208 +++++----- .../policy/overloaded/vluxseg6ei8.c | 208 +++++----- .../policy/overloaded/vluxseg7ei16.c | 208 +++++----- .../policy/overloaded/vluxseg7ei32.c | 208 +++++----- .../policy/overloaded/vluxseg7ei64.c | 208 +++++----- .../policy/overloaded/vluxseg7ei8.c | 208 +++++----- .../policy/overloaded/vluxseg8ei16.c | 208 +++++----- .../policy/overloaded/vluxseg8ei32.c | 208 +++++----- .../policy/overloaded/vluxseg8ei64.c | 208 +++++----- .../policy/overloaded/vluxseg8ei8.c | 208 +++++----- clang/utils/TableGen/RISCVVEmitter.cpp | 61 ++- 561 files changed, 32120 insertions(+), 32087 deletions(-) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c index 3c92b6865097df..e3a627bce6931d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, @@ -66,7 +66,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, @@ -78,7 +78,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, @@ -90,7 +90,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, @@ -101,7 +101,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, @@ -112,7 +112,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c index ff506a20cc2630..feb9dcd24a51a0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg4ei16.c index ad118d5a33f7a5..f136e3665c2b41 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg5ei16.c index 931194870ad4fb..84b4a800cc96b1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg6ei16.c index d2ac1ef8fe0ad8..01b393fb260c21 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg7ei16.c index d541ed377013c3..8b08ce64519ea0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg8ei16.c index 69de1bfd4e630f..bf143bf385dbd3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg2e16ff.c index 6c9749e61ca0b9..c22a7e26a3e2b7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg2e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -95,7 +95,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -109,7 +109,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -123,7 +123,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -137,7 +137,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg3e16ff.c index 78085cbcf0b9c2..77b4ccafa4d835 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg3e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -95,7 +95,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -109,7 +109,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg4e16ff.c index d194cebd20f5a2..cc395b1124e686 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg4e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -95,7 +95,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -109,7 +109,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg5e16ff.c index 6be747289efb25..2295b9f86f15c8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg5e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg6e16ff.c index 469a116ace864f..9a92c904781cde 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg6e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg7e16ff.c index 903f9fe057d32f..84dfb9378ac2e8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg7e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg8e16ff.c index 70e5ce95ba0c7c..49b4cd08ea38d1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg8e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg2ei16.c index 6d408c1df68646..f525c20a0610fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, @@ -66,7 +66,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, @@ -78,7 +78,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, @@ -90,7 +90,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, @@ -101,7 +101,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, @@ -112,7 +112,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg3ei16.c index 0cf8ae8a74e478..0713bc159f4e0d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg4ei16.c index 2a3d4262567b36..96964f51031e69 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg5ei16.c index e6d3b53b26165e..cd980f2787746d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg6ei16.c index 8e666defb287be..9ef9cf46e6d64f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg7ei16.c index 4eec8dd2e08d93..dd848fea479222 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg8ei16.c index a5f0a59097564b..13c4b05b2625a2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg2ei16.c index 46fac12e6d7e3d..6b16ed734f3f3a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, @@ -66,7 +66,7 @@ void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, @@ -78,7 +78,7 @@ void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, @@ -90,7 +90,7 @@ void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -101,7 +101,7 @@ void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, @@ -112,7 +112,7 @@ void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg3ei16.c index 54cfa7aa666ca6..f6d0abab5dbb9e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg4ei16.c index 76fb573f661a55..bbfa3da5fd6040 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg5ei16.c index 7a24b054e2b1cc..5061bbdcfab528 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg6ei16.c index e382a85b15527f..4557b91d5b680d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg7ei16.c index 92258de5fd3542..b5cbcce6341fe0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg8ei16.c index 96c3995c370cff..686da3f59edf27 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg2ei16.c index c2cf18ed9de7d9..0265d1de9c1121 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, @@ -66,7 +66,7 @@ void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, @@ -78,7 +78,7 @@ void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, @@ -90,7 +90,7 @@ void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -101,7 +101,7 @@ void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, @@ -112,7 +112,7 @@ void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg3ei16.c index 5cce8486d4aa15..11ac764b0b63f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg4ei16.c index 5b8c6e82ca3243..951f4964f7180d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg5ei16.c index 9095d946fd8584..ecf92d16a355b8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg6ei16.c index 5119250adb9e1d..c1d0c909071678 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg7ei16.c index 056fa42d3dd224..cc30b4e72fedd7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg8ei16.c index eab39f75ba8e77..98a01c286cdabe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c index 8253b461af7df4..6ddc795e0bb95e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c index b43b6266e2b3ef..a4f20db865bbf5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c index 7d7b13a8b03947..cb708667967f1d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c index 84d421b8a31817..ee1074d713b559 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c index fc6e0cd13a5d4e..96b7a04ccdbb9f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c index 8b591fd5de4d78..907f13b9533f17 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c index 084ad1bf25588b..2e7eb28f95b48c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c index 7b9f719808d2f2..fa14ae29e1aa19 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c index 40bfd8ed979d6a..dc480a09f99327 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c index 2fdce827defcf8..4f9533c04c4b40 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c index 456989831fa0d2..02135ec6340444 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c index 31f49fda66ae3e..c1ad7aa3f25a42 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c index 0edd52db99e4c2..0b0c8a606dd98c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c index 09aec05409e5f9..6af4926f970c11 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c index 69215e4c99d25e..460f0d44161746 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c index 92e821a87589b5..711f4a2666fca4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c index 0f01011cc765fe..1fc8a91b2edef8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c index 4d2b7ae3fd4224..d3487ebf26ba72 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c index 8805f232bd5381..d365f3b5a83f93 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c index 17505b79746372..88475c15b2032f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c index 3ac00aa0a868d3..0265ce70e3ff0b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c index 227fdb257f5da3..8b6be68f395064 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c index 3354fa4f997f74..b196967a8fa346 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c index 578d3b02ca8bb3..0eb52a8167f072 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c index 64ff6caf73bdef..5bcaf76ebbb28f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c index 7641a5968f0c63..cc47569df80dc6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c index 49278c40023e90..f44940e4f785f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c index 87d9b95dd1d2c8..9135e43edccff2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c index 86bbf882d95f11..88265f5e04c07e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_m(vbool4_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c index 2696673b9e3f2a..cef7749bc83942 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_m(vbool8_t mask, const float *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_m(vbool8_t mask, const int32_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c index d4088f0eff4fdd..9cf118919fd3da 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_m(vbool16_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c index de45d9c0e093e8..baa435fbb2bf54 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_m(vbool2_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c index d49604e9db3d41..2e38ceecff2792 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c index 641c2c413c0234..df2d9855169531 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c index 21dcf0c9158864..d17acb73435c68 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c index 9da1efa32cddf6..fc62baa37410b6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c index ec495e90407293..cfda2cd5d87f0c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c index faee24b3e5a416..ac403e192d2829 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c index e8a856ae096cb3..47d2eb8ece5dc1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c index bb268a09abe00d..d784020fb19453 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c index f3d6ba3f9ef725..d6db2a8a24246f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c index 61be82853a7a67..8eb139a92e9c39 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c index 916bf4bb0cc05c..395c509859b037 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c index 24e22388556796..d000a1b74c2e21 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c index c2a47414173308..77558b7673bc27 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c index 257494a777077a..af2a9902c9bf6a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c index fc84de20d20e75..0d7a2ef2e19c4f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c index 64469b56357c3b..f91064648e7336 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c index f16e5572088796..66d06b24f27467 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c index 64711d8bb6cb24..864b784f96fb90 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c index 68e845110c4ecc..6e8994cc89cf83 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c index 66102fe6482e04..c04347259deaff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c index 3db74ec9b6326d..7ca95d7f58d734 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c index 3d63354ac2573a..0c83da1d00cc33 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c index dff16c7e095374..b2253ef0f47b4c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c index efebd4d789de6b..46b4f6ab6b5794 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c index cdebec226b9d59..0dad625a30ddb0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c index 10f44bbbe21068..fd3b01ec7113e7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c index 8d9c86a0918ac6..d21cf31a268832 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c index a02c5e19eafd0e..ee07437d56e737 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c index 3b47631d46b85b..f1e0292efb164e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c index 7f7ee7b151e34d..691a4137515279 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c index 8a63ede1adcf3e..2b6f10ddd13100 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c index 3abeb0593f6cc6..6f48b3e4d07719 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c index 26a309e2af78f0..47cb7275b9acb9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c index b00c95f63bdb40..fc0351ec70d0eb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c index 49c05bb5619b37..25edf108b58d83 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c index cd37135698b078..91f8613fefc459 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c index 105b6af9ba7e8f..2292a895252f56 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c index bc5fad7190a884..59d9d1d16edaed 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c index 21d245fe45282a..8734285678df89 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c index d52b6301a94790..cd73cdf76df473 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c index ac0caeea5d0c56..2f1b6843ba9e8e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c index ab475ae2b55c06..819d8328eca1d6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c index f50227ec8ecfdb..1be7dc4a50cd05 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c index efa2d311f24d0c..ec9b1b9dc15bee 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c index 2bbfa0cb5a6e38..092b0b5999c59c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c index 3e5ef9436e96b1..a47d057326dacc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c index 8eec673bc7f171..40e749bf60545d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c index b0e042424cb8e0..a813871a82ab3b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c index 55efc4f24ae098..f67c4fdfbec237 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c index 72acb61383bf8e..5b8da945b322fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c index ee90edda7a3bab..0c18081ffb8798 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c index ed44f7b08a0eac..e0e2618b94d186 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c index 144959a762e011..629936f76b74cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c index 366f83faf555c9..e340289164a7d5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c index f0f07a1c994105..e89048b1a2f907 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c index 5dbbc384179c86..28f42913287d2a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c index 4aed99b8d3b860..785016338859f8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c index 51c98fc77b7e13..f55b084595130d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c index d636899c2a18c4..cf1f4214c17881 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c index 151a4f1aa98d80..e2c29e6a98a5a0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c index 4166a34282d7d2..9467938a037f51 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c index 22567e546c5160..8bbb9f86b07903 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c index 5a5944f4cd6a33..e91d73aac16c34 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c index 02f8ec6f693f5c..d96f911957328d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c index 94153306d20183..de5db2869f0177 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c index a72fd4149e9f6a..73bb9febe6e63c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c index 25d32d66dd76e6..1f7fe50ea6bb39 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c index 53ff659495e37a..b53ac68c799e22 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c index cf05ffeb8d972f..ad18d970ec5f52 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c index 7813c218f21e75..755cb31ac4d73c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c index 1d6d5816f304f1..4df41ab873358f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c index 05ca77020c1dab..3f197a28951b04 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c index 4d151f951ff8f5..aa14469fa2ad96 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c index 84cbda116810f9..47fdb5193d70b2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c index 086a0c39389335..21f7f8e198852c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c index 35678e75943326..f5871641e5cf67 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c index 6551b4b86d96e8..656ee8e92071ab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c index 13550fbcc02780..52adfb62ceea12 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c index bf8884f748142f..6d0a9f4c7e5b3b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c index 03959b29783a3b..b37b79ce73bde6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c index f0c987426532f1..0bf332920c77e9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c index a0ccec5378293c..19a340be7b4bab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c index 16de15c3f4a539..7eaf7ad6b51125 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c index 8dc1346edffc81..5aca1f8017ec89 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c index f9b76824236b39..03a05e3949cd39 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c index eb7e7162210e6b..3540a9b5ec1c71 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c index d2d71226d07838..213350d2856682 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c index 7f66f3511758b2..5ecdd65c29e6d0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c index 7689c51f46aa05..b9132b9273dd30 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c index 2bef098811a915..5910e50273f6bd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c index 27129b8f0ad29e..40dfc6d58bfe1d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c index c24467f36e72c6..fcdd26bff18f88 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c index e082c795ead05d..b63fae90a4978a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c index 5b49712bdd26c7..18bffadd41ff7f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c index b785f2b3e1ce67..0f10905732bc19 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c index 3cbe449810cf1c..4a7479673ff650 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c index 2bfa6203c199b7..f0928b55c3a1f7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c index fee5c6c59b7acf..63a982958382f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c index 0e37256d7cbbdb..968defd7025568 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c index 30080d103f1b36..895f5c8882eb53 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c index 5e034126e3890e..6717a6ab568d5f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c index a260ee0abfa14d..d4f9a324d55c26 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c index 02496947a0a3d2..431459dc548fe6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c index 91dbe997e86fd9..7570129544bb06 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c index e84b9308f6f15d..06997ebfb67aae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c index 02df0ac48ed0db..40fe2b93b639a5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c index 144f31d8797bf2..4d3deaa462c40a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c index f84c43147b4153..ffe23453e0e381 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg2ei16.c index b8072943b4cc7e..cbeabc47ea9ccb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, @@ -66,7 +66,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, @@ -78,7 +78,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, @@ -90,7 +90,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, @@ -101,7 +101,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, @@ -112,7 +112,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg3ei16.c index 1a803532613b2a..5ec3fa5dc2a876 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg4ei16.c index 181cf80e7a1618..c2308e33763b23 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg5ei16.c index 2af306751af590..586df70881a0b6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg6ei16.c index f4f95732abdda4..d747a3dd9f9dca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg7ei16.c index f7b4b267ba4829..6066ccc25fb60e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg8ei16.c index 26a33b540929b2..1e0a1c2f3f03b1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg2e16ff.c index 8218915b893786..5fa5f9d9e4bff0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg2e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg3e16ff.c index 59a87ed655780f..435df4bfd378bc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg3e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg4e16ff.c index fb94ee68b5d448..90519c8d0e0b22 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg4e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg5e16ff.c index f0e7387f7864dd..0cf9b49f58e22e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg5e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg6e16ff.c index 0fe3fd05d682ea..61ccdcd86c8bda 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg6e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg7e16ff.c index 2679e8ea8b168f..d9e6de09d7ec29 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg7e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg8e16ff.c index 3dc2d5489a7ef8..d68f653872f4b5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg8e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg2ei16.c index 64397c5e4d36a7..91138f23a484bb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, @@ -66,7 +66,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, @@ -78,7 +78,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, @@ -90,7 +90,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, @@ -101,7 +101,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, @@ -112,7 +112,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg3ei16.c index 9d5d8286440e0d..eccc7bf8277690 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg4ei16.c index 3cc14a562b5610..0a8f9fde504b2b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg5ei16.c index b9f74744001218..1b38c8baf740e6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg6ei16.c index 0582eed3d29c6c..194bfcee9ef759 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg7ei16.c index c98764ac0bca31..d20db96d60d787 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg8ei16.c index 2cc55d016c375c..6826edda32946e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg2ei16.c index f96a74ae467c5f..710bde9c515953 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, @@ -66,7 +66,7 @@ void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, @@ -78,7 +78,7 @@ void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, @@ -90,7 +90,7 @@ void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -101,7 +101,7 @@ void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, @@ -112,7 +112,7 @@ void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg3ei16.c index 04a2b81fd0ac36..962b1f4f92a9a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg4ei16.c index 35b24d5d2027fd..d34f441a2b92a3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg5ei16.c index f79ad6ed381053..a2a0da23de8ab7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg6ei16.c index 71a02db956d732..874e2213bc1cdd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg7ei16.c index 1d0e55a4c9b260..092feb27eea362 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg8ei16.c index cc28d61289d010..06b6a374fedc92 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg2ei16.c index dd2aa78fe43afc..35530a70e04291 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, @@ -66,7 +66,7 @@ void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, @@ -78,7 +78,7 @@ void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, @@ -90,7 +90,7 @@ void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -101,7 +101,7 @@ void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, @@ -112,7 +112,7 @@ void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg3ei16.c index d18dd779b0e4f9..a6a88aad089084 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg4ei16.c index 82b70986cac0d9..0e81863acecaab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg5ei16.c index 376e72e90a76dd..800fef28f29a84 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg6ei16.c index 15815128b9c844..16477ece39f545 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg7ei16.c index 2ba27ec46f267a..40ab7c0350d769 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg8ei16.c index c29a17441c5b73..d8276b7fe40a8a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c index de6d0fbcdcaaeb..b51d800d6ad7a4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c index acbfa82b8bb2d0..08c15b0220d53a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c index 3de2eaf48bcb3e..f031fd3702962a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c index bb70a3d9d1b8b3..0737c6f85bc1e5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c index bdd978827ec76a..4b1cc342e9581b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c index 103ab7ffc06fd3..98fad9d89fb046 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c index 2168d23bde06ab..45b18229a883a4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c index 28764d6336c15f..1713de8b096f49 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c index 2b70ba4e781d80..acdba231e3f9d2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c index 7395b9e72fc678..ed0fdc8d175715 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c index 51f073a75a7a0d..fc3bdf1b4fbab3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c index c5a0d137c3866f..88788823c84a24 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c index b739b8b1eb1ea8..7e94ffffb19838 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c index 5a526c8839b3c7..a6129b9a7c3783 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c index 49c6e04f5daa20..77c72e14f53a27 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c index 044281b729f23f..5b68d045161aa2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c index be6b1bbebbb931..d91105046d96c8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c index c38fe1a7074759..dd96b82b754be0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c index 93cf75713af6b1..ce9cf6ed9fa09e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c index 6e331cc1b72e20..3e7fa1893cbe67 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c index 7db0e894c85047..16f6133a05f420 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c index 66b6ecc6bc9002..6fb0e7c9d647cd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c index 9fa71b6c778a93..9f8f5b19b58ffa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c index ff6d8ef7d69684..dec7a0dcd4c98c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c index 17db14389e21e8..47e75270b22c90 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c index 31a311cf5294ea..3ae4b67064a83a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c index ce1fd5ed1d6456..bd3754b67c7002 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c index baf672a8787062..f8c8e1d1c6e1b9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c index a033e06c457998..2a349939a3987b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_m(vbool4_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c index 3ec0991f4c59fb..b1bec34c098cbd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_m(vbool8_t mask, const float *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_m(vbool8_t mask, const int32_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c index a609c0f3e62f69..b48da19001a545 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_m(vbool16_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c index b52c263d28892a..5bb88304b1f768 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_m(vbool2_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c index f412a3be51cfbb..8acb2561502509 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c index a56c15ad537c90..448f621c6e9544 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c index 70ed14c9910f4c..a4716a31320941 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c index 7395bbd2649b88..855658609e3ae0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c index 001a750df5ea5e..95899e8f9cc58f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c index 3b88462c6605ad..3fe6d365d54a7b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c index ac9d69b6642794..6c5434f16ec589 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c index 2a2aebb2500a4a..510dc816475333 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c index 627c7fc71ef64d..2b6a048e46f2a0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c index 9772c4956e99b2..b386f0451fd0bb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c index 2af70f1612a215..93463e0b0039e5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c index dfa5c265cf809a..f24120977a1639 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c index 08e2b267bee984..8ab64ec49e89a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c index 26eac4e239b36f..c68013e07e8bc7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c index 804482e9fcd018..ef0dd6374f782d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c index f2983108ab7e64..606b27a588f285 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c index b93a5b71ef81ae..e2e7e21ba0a5a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c index bff80fd0c00507..f550e5f3bf87bc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c index 2bcf4fdb1ec838..74ed16b165ee33 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c index 24d96adb44d9af..476b1a44765cc4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c index 8c4511912763b8..659b7776a838ce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c index 9f8106c5590892..589735278d0237 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c index dde642f10d2f32..9cedb2066c0f46 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c index eb410763c10c5f..ccdc2ebbca42b3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c index 660394affe4818..29f3bf60bf8aeb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c index 297609ed5712f4..19690a05aa3630 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c index e61165065d9d96..35aa11afa0388f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c index 0e8dad08240049..0536770cbef438 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c index 7beec8b207da18..07300daf7b1932 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c index 89e690a20be3f9..6cd1df5ce3eafa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c index 073a83536d33e7..1118ad4c09eaec 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c index fe7d9c403a50f9..e28596034875f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c index 71a1f9b1580116..1ab0adef8484e1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c index 8b54262def489f..d5f59061e9b686 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c index cfd67e337082ee..77cd350af20f16 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c index 6f3db167a77297..dd7381a4ffd0dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c index 3361b70ab50553..508d900a36640d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c index 6b7dad071c151d..cd8bbc980cc8e8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c index 26fef0384cd7a0..709eb1bd9c01e0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c index 6193e9f164eaaa..177eb292a2d150 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c index edbbae46d6863f..3ef4d1861bc572 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c index a3461cab1abe99..ada8dd83c3b4a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c index ded2b2562dc056..818869781f9631 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c index 99f137c6b0c734..03d28a71fefbf1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c index 5190eb108b7860..a438a16182d9f0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c index c475d6345f0106..9e43c1ac1f6e41 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c index baa995b7b1de6f..edf76fa836f7fb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c index 14acaf8c5d5f07..bf354add311655 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c index 418a5dffca665a..1c8dd615e0240f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c index 9122a0cfb4ecef..cef6b7bf7ddece 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c index de96b1d64c6524..0625d88ad1bdca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c index 713697603e0392..a00686ea71cabc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c index 115ad00463fe51..5ca25c2e5bd164 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c index ad3000ce5851f7..c1493f0ebac5cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c index 065a9e21701538..f378c2a2fd2300 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c index 062943ec7a35b3..d0229ef3ed21fb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c index 1b37047f28c1ae..56a1c21958fa6e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c index a28fcd5801af62..8837a8ad83d411 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c index 43bc95f6596873..10573d1f29203e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c index 6eca72fd39d76c..b7aa2151a679a1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c index 1c26521c9bbde0..06fce8976a82d2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c index fb6eb364cf34b8..0e0d114418be14 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c index 139e2f7e8ab910..0553f93d56d957 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c index df9497d2d3a788..436cc39d0ab4f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c index 0daa4d28936989..bf98cdf799ba45 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c index 4f06f94a4e6d56..c3c2ab27fb3a8d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c index 4de616b8ad37c3..4476e538e80be5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c index 291745f767f448..f33b8d18f4e168 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c index 7c155a4cdbba1a..b6fb3815045149 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c index 71c7d85c6f1213..9da707d008fa1c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c index 004a7fbeb435b6..0c5e06ddef758c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c index 568af717287de4..d5224badfbc84a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c index 602816ca1691ff..857b9afca051f9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c index e1f04c0792b087..222464f3ce37e1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c index 3c9a3dd78af4c9..795974c2ea841d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c index 5cd3d0f6997205..2f1be2a04df516 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c index 69574cd00d7b71..f5a6292f241121 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c index fcf212a759ddd2..36e3a232629ab9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c index 15a824dbbfac76..d821dbc0e7e52b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c index 7bcd658af46945..877523fcb752e5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c index a0297b1d816a4c..44db95833e3f46 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c index c23d7ea75dbecf..d99295446519cf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c index b082b82093cb3e..9184d7d6f622fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c index 6cb00b9aaaf4fd..16da657a4f9fac 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c index 66637eea967a6f..7889f09ce38b77 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c index 35416daa7a3cdb..42663370dc798c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c index b706a9666d0d1a..5619f8729d2c39 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c index 8d025c252fc382..ea0534365ddc35 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c index ced4e074aefca1..f0156c8bc5ef77 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c index 5bcab9cf1a7af9..4441ccedaa3dab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c index e39b3bd2d6ae30..8ebcfb23623f09 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c index c732a4b430a842..6caee3c0e4f691 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c index ef11cf64bd53b8..5c7395f1a2a094 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c index cac07a1aa1ca7f..00e47165601384 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c index 55a6103d0a8e1e..d8b909d7f1bfd1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c index 4090505cddf5f4..459888d28e2127 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c index c25494c9f1e7a4..3a332accd3361b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c index 12aa848ea6b3b7..a47f9ba19eda48 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c index 10ef73cb3e9da7..d94fac600dea82 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c index 452a66e3183cda..e525ed065fc059 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c index ca9cb0a8bc2131..d4f6f78da61256 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c index 36ed09a6f4fb43..94a88711e94c63 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c index 4c0d5582df4622..b0c379840b020c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c index 16f6f577a62a9a..1492a10ed7c8c3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c index a2026d9daf0210..c3d66c2b3a0a03 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c index 4eba496ef338eb..88bcc546b1f2ae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c index 9c1dfeaa0601f2..a8994a0ba39ec8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c index e4ea69a3691e00..0d3995dbb4b949 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg2ei16.c index cd0281a4f9367d..7c8a70e83104f1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -171,7 +171,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -181,7 +181,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -191,7 +191,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -201,7 +201,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg3ei16.c index b93d1a983159c9..708a3dfe94fa23 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg4ei16.c index 6cdd4621644c3d..af80f6a8497f70 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg5ei16.c index 527ff1d53054c6..c373937a7a4c00 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg6ei16.c index b762fc064f0bcf..0bd15e5c202d63 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg7ei16.c index 9bf2ca156f7b95..81547609fff383 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg8ei16.c index 2b2f910b29694f..e7b133f0a52fcb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg2e16ff.c index 7e061c6ca7f463..e2d23127f2762c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg2e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -167,7 +167,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -180,7 +180,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -193,7 +193,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -206,7 +206,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -219,7 +219,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -232,7 +232,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -245,7 +245,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -258,7 +258,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg3e16ff.c index 8dcc5a84cfd0ed..837a524dfff615 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg3e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -167,7 +167,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -180,7 +180,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -193,7 +193,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -206,7 +206,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg4e16ff.c index 6f916412ebcd14..950b32dba1931b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg4e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -167,7 +167,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -180,7 +180,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -193,7 +193,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -206,7 +206,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg5e16ff.c index 3937a1b1dd1369..aacccc54f9db40 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg5e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg6e16ff.c index e5aa575aca3b86..a8f113a109d811 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg6e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg7e16ff.c index 9948deaa59d1bd..9aac3180827a7f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg7e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg8e16ff.c index 18c0611ae8ed1b..2aec5af87f37a1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg8e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg2ei16.c index 1e2de39ee6d1af..a1acdb3f15e7a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -171,7 +171,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -181,7 +181,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -191,7 +191,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -201,7 +201,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg3ei16.c index 2d93869ab46056..cb3ac512fb35bb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg4ei16.c index da0bfe95f13630..275cedf059963e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg5ei16.c index e8e40a6c5c7841..fd99913b63b4ed 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg6ei16.c index 120a836c0155ce..af9b21fa70e8b2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg7ei16.c index 52ea20e98539d4..e9916204e6f115 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg8ei16.c index 0e16781cef05da..c3d2c83ddcdca1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c index f7d024c6cb3c58..6680cbd1261679 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c index b2798c3fb3531a..1d1ac40541961b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c index 081306c85fccc9..15708d0c8e23eb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c index 42ff16a18ea34d..4c3bdc68ffc07b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c index 3f00ae95b7fa05..60b9524252a593 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c index 34b29b3c139e10..25fb9d25f32f28 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c index 72b0923a36f317..d04a27497f4dea 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c index 4d105b2c80e9de..c2f03cec985fd8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c index 8ca3d370ee6493..f08193e25ff1d1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c index db19aa00d88aaf..543e43bc663d6c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c index dc250b869f77e1..5bfa5874be335d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c index cce3cad35c11ce..db3d822600b3e6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c index eb18c32c292b15..83591763950e2c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c index afd9521607a3b0..0712efc7ce2182 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c index 0dadc3145353ae..a8acbeb8775f29 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c index bd2534f42d8c00..2f1501a75fe646 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c index 69278176e8cf25..624004742e4562 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c index a1e8d1397bc1a5..c439d71f80a70c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c index 08a39a8142fc93..d0def111122722 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c index 7b58ff6c8d37cf..7268c800edaae0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c index 226f2e8062f85a..e04a78db3a5b0f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c index 9b0dbd68900e0b..2b43eb52b5a1aa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c index 0c1a25dad8f2fd..7a5be2a59df4e0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c index 1441c80b329894..1be6adfd879228 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c index 0d2833b247a55e..dbc35725d5d40f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c index e4f74b79904279..da2173531ff17e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c index 0c23f082e8eed1..e4238cbdbabc54 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c index 23efe2b17f424f..875b8f7f9e5959 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c index d651f99a0d124d..4e0e031462db38 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -634,7 +634,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -647,7 +647,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -660,7 +660,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -673,7 +673,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -686,7 +686,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -699,7 +699,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -712,7 +712,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -725,7 +725,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -738,7 +738,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -751,7 +751,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -764,7 +764,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -777,7 +777,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c index cf826bd71012ca..18633b89ba11a6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c index 1d2fec0fe4b9bf..7a359601a9a34a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c index a3dad7ddaab1db..0b675c61e395c2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c index 4cf5a2a939eaa1..1d043779747abb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c index 3a06d36f930352..a0d29dc43e7ab8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c index 399c35418bd793..5c9aecbba2411f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c index 2881994107e1f6..eba0421b8c8e76 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c index 7706fd521ad098..995a772e209240 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c index 5fb3210e81dc4f..fa3ae9ca72ddc3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c index af1b4af1f265b1..264825df7a7333 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c index d77f71b48e9943..227a69d64f21b9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c index 9631b7efabb579..21b4303bc0458f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c index 7872d8226c1f67..7932e5ba6ac429 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c index 76083f6a7117f5..f0cc0246fe8174 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c index 046c34841108fb..90db070fe82abf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c index 94b2772d3338c1..8e15288728d567 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c index a642ef922db72e..7a0274850d5b86 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c index 2115568670ef4a..4cfa72df5c022f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c index 854f4f2c05181e..17155282e1938b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c index 3b465f0a7d68a5..13998a359a01fb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c index 28d39419f3195e..42ea055a90817f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c index d97e281d51f8f3..315b7a1d05d02f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c index 6dbe69b968d9a4..464ea2a2fc53b1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c index 20560db1cb77df..7fbfa4c78af39f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c index b970eae6c77151..643440812419b5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c index 4eef00d4c9cf58..eb0c5910e6d6b7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c index db9c6f9c211ede..e1bce794c6df78 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c index 2f02aaabffd435..0578d07a888292 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c index 43b6856feb36af..f0e759c4e82b2d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c index e6a92ca455f63f..76a315b9391961 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c index dd1a26e6766528..73729d3f24dacb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c index dcca2773cf6a8e..49426d4000aec2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c index 50144a47f07127..c860976a327f57 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c index be427b31c37075..e3ebbfbb5ae90a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c index adf0a87100388e..c10e39bc7fa85f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c index 148649cb646979..7b5ee95f459c21 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c index 24087a19fba504..fa13477cbae8e2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c index 2ec9dc591aff2b..12ba944cde2127 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c index df5720ce1c427f..f5d0c3eb8edb33 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c index 8bdf6196da7ff6..f2ae4592ada790 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c index 3d7f11ee9903fd..cda24727bdd894 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c index dcb417756b0220..cb9216a7b30866 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c index e85c8a07b8b6fb..07fc7b954f4d06 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c index 7d547782832bc1..02ed65e7435c2b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c index ade2f409d50f93..2a4901e7d1f2eb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c index f7f5b3a6d158e2..09c15b9ca49c10 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c index 33e962badb5d14..a43bc110aa5fbf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c index 8391a03136229c..542c0616d7d8bd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c index b2acbddd2e66e6..a2dc98132d87d1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c index aaf383a087458a..962f059ba08ac7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c index 563c4f38e7ae34..655a310793dffa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c index e7cfac51fdfcc6..d5f9039a328e0b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c index 0e7e0ab7f3d89d..0804c874a0ac48 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c index ec78fa6068329e..fc5103356d289b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c index 7521260ff8d853..46b0f3cf3771e8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg2ei16.c index b39a55d7720eaa..5815a76a43ceb0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, @@ -71,7 +71,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, @@ -85,7 +85,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, @@ -99,7 +99,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, @@ -112,7 +112,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, @@ -125,7 +125,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, @@ -138,7 +138,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, @@ -152,7 +152,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, @@ -166,7 +166,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, @@ -179,7 +179,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, @@ -192,7 +192,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, @@ -205,7 +205,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, @@ -218,7 +218,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, @@ -231,7 +231,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, @@ -244,7 +244,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, @@ -256,7 +256,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg3ei16.c index f6a46e633fd95c..711167c67ecce3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, @@ -73,7 +73,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, @@ -87,7 +87,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, @@ -100,7 +100,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, @@ -113,7 +113,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, @@ -127,7 +127,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, @@ -141,7 +141,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, @@ -154,7 +154,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, @@ -167,7 +167,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, @@ -180,7 +180,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, @@ -193,7 +193,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, @@ -206,7 +206,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg4ei16.c index fa40e5c116ca89..45af79a960e916 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, @@ -73,7 +73,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, @@ -87,7 +87,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, @@ -100,7 +100,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, @@ -113,7 +113,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, @@ -127,7 +127,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, @@ -141,7 +141,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, @@ -154,7 +154,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, @@ -167,7 +167,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, @@ -180,7 +180,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, @@ -193,7 +193,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, @@ -206,7 +206,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg5ei16.c index c0dee3d304aa7f..a7ea11363d5de2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg6ei16.c index abc424729a2d4d..21130591c2db8d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg7ei16.c index faa2e2266ac917..e48c8738f1e0a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg8ei16.c index 1cbbb6b56d6be4..3fdf404383311e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg2e16ff.c index f3889f115a8e68..c6f72916ac8e44 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg2e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -71,7 +71,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -86,7 +86,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -118,7 +118,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -134,7 +134,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -149,7 +149,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -164,7 +164,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -180,7 +180,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -196,7 +196,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -212,7 +212,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -228,7 +228,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -260,7 +260,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -276,7 +276,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -291,7 +291,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -306,7 +306,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg3e16ff.c index c1fc7f13d64cbc..a332d292f79009 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg3e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -71,7 +71,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -87,7 +87,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -103,7 +103,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -119,7 +119,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -134,7 +134,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -150,7 +150,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -182,7 +182,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -198,7 +198,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -214,7 +214,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -230,7 +230,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -245,7 +245,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg4e16ff.c index fae68f7c9f3607..004c75f9db6db7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg4e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tu(vbfloat16m1x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -71,7 +71,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tu(vbfloat16m2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -87,7 +87,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -103,7 +103,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -119,7 +119,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -134,7 +134,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -150,7 +150,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -182,7 +182,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -198,7 +198,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -214,7 +214,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -230,7 +230,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -245,7 +245,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg5e16ff.c index 3d98f55c39e214..764d77ede36890 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg5e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tu(vbfloat16m1x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -72,7 +72,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -104,7 +104,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -120,7 +120,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -136,7 +136,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -152,7 +152,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -168,7 +168,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -184,7 +184,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg6e16ff.c index 75ace1b4806e0f..3e62a07c57937e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg6e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tu(vbfloat16m1x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -72,7 +72,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -104,7 +104,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -120,7 +120,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -136,7 +136,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -152,7 +152,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -168,7 +168,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -184,7 +184,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg7e16ff.c index ee59df4dd9b170..931e93c18b2571 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg7e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tu(vbfloat16m1x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -72,7 +72,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -104,7 +104,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -120,7 +120,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -136,7 +136,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -152,7 +152,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -168,7 +168,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -184,7 +184,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg8e16ff.c index 3ccc427f641631..214253e9b619a3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg8e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tu(vbfloat16m1x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -72,7 +72,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -104,7 +104,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -120,7 +120,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -136,7 +136,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -152,7 +152,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -168,7 +168,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -184,7 +184,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg2ei16.c index 9c510fa3e7a274..5b589b410311b6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, @@ -71,7 +71,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, @@ -85,7 +85,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, @@ -99,7 +99,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, @@ -112,7 +112,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, @@ -125,7 +125,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, @@ -138,7 +138,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, @@ -152,7 +152,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, @@ -166,7 +166,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, @@ -179,7 +179,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, @@ -192,7 +192,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, @@ -205,7 +205,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, @@ -218,7 +218,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, @@ -231,7 +231,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, @@ -244,7 +244,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, @@ -256,7 +256,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg3ei16.c index 90652e820bcf61..6d2583ed02a8ee 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, @@ -73,7 +73,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, @@ -87,7 +87,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, @@ -100,7 +100,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, @@ -113,7 +113,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, @@ -127,7 +127,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, @@ -141,7 +141,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, @@ -154,7 +154,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, @@ -167,7 +167,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, @@ -180,7 +180,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, @@ -193,7 +193,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, @@ -206,7 +206,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg4ei16.c index df22e26960a3e3..886d35f4f70a35 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, @@ -73,7 +73,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, @@ -87,7 +87,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, @@ -100,7 +100,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, @@ -113,7 +113,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, @@ -127,7 +127,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, @@ -141,7 +141,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, @@ -154,7 +154,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, @@ -167,7 +167,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, @@ -180,7 +180,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, @@ -193,7 +193,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, @@ -206,7 +206,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg5ei16.c index cacd851f5e4e27..4223e926e07041 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg6ei16.c index 4754e8bb954b9d..7eb149ea1bb104 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg7ei16.c index 80fda3c73155f2..4ae7f446f511f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg8ei16.c index 930ab0a28b93ac..9a8c076d6cc606 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c index 6aa43b1375cba1..cd47648203e913 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c index c414acc739f659..61e2694d2b9ec6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c index de7b96aae04472..3960e8209b6076 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c index f8d01d0fc45aec..57347d70ec1898 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c index f45747061d73e2..01b7d62ec654c1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c index 90767841fdcb97..0f6e3c3707ba40 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c index 1a7b4bedd2a2e6..5a509b71175cbe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c index 333e7eee028cb3..dcc14036803a54 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c index 1cdc946d081d04..109f93c150eb69 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c index ba84987fb9cae0..14f4f454054f5e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c index f9cf85a37f9525..f1c971f4a2921f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c index 4dd1f8708e9333..72b0773de0cf29 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c index 5172dbf0a2b3ff..f40b1c02dfe178 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c index 5a4ebcb8f9e932..8565c80a26b7ee 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c index 8acc1c3670d3a3..eddda795a27bba 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c index a54ef5a6d86ad0..359bf4bbf299f4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c index b7afe98d2c6bbe..1b779ac9f5e599 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c index 24216a4b54b10e..598b68d7758146 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c index 075e498b97e2f1..884468562aed9f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c index 123cbe6496696b..1b0ed30ea445f6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c index b35e90251d0c35..f619f8ea91fbc9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c index 3e4ef02fc1f785..854cb25da58f0e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c index 8e978cfd02719e..367afe9d88f9ed 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c index 21c84239ffd6ca..058d9bbc72b72f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c index 28259081f8c0f5..8daad223e1691a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c index c29b2f767a5513..e798d230d8a9a2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c index cf79fb0e8d9619..1a784ba93af924 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c index 37d390ba25aed6..a99d06a6068096 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c index a08e624f5de063..099f6de35bc9c1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -634,7 +634,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -647,7 +647,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -660,7 +660,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -673,7 +673,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -686,7 +686,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -699,7 +699,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -712,7 +712,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -725,7 +725,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -738,7 +738,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -751,7 +751,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -764,7 +764,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -777,7 +777,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c index 7f5b21eba8583f..4bdfcf3d59bfdb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c index e21b596ee03c42..5729254e5493d1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c index f4591392f15b3b..4377ce7f64b6da 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c index c82c81744616f6..1d673a456c3097 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c index 42a4aa325de6ef..8c8c24294310c8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c index e139dacba5aa07..aecfc5445659dd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c index 84ff42ce5a04f8..3fc5c4bdbb2e03 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c index e957dd7705f4ad..1250ab6ebb2c47 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c index 173b8df4a9396b..4dc3cfd4d1880f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c index cc720676a8e8e6..f4ed85abeba0e9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c index d16934af48a0c0..d3a9b995bcbf1f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c index 99969b08168b7d..0a520d75d5cfae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c index fa7c0bb1c6115d..f9c0f7f153b395 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c index 4d2e08e5dfc258..9bc778dfd3a738 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c index 3494e012e929c2..8bbfe7d88fe186 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c index 5e96a0d99a1239..51af97a8d2dee3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c index 4aaca57cdff92d..76e8661dfe5d9b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c index 3e711a6cbc32f4..8bc6d3f699425c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c index 572acf6f73f1a5..abf1d50c7ac8fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c index b1962ca6136f92..472a9c342ec443 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c index 05eadd643bedf2..74397d53c1731a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c index a4071aca53b19c..5a08a7c94dc05b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c index cf44ba1ced5f45..1aa853e9738bb4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c index a8629007726bc9..24e50a9007538c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c index 49550b8e08be59..1651976c224876 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c index b92ca837fe5133..6d091682ca2b9a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c index b360e4524e5dfc..2e0f7edd67d41e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c index bb8b45d7189fdd..c966c645d640da 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c index dd2930586dfeed..2f1de2e2bcd1f3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c index 60421bdbe9c22e..6fc021d374d18c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c index f912905043c05b..efe58ac87a6a4b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c index 56c5054595c1c5..ffb5937ce2f8bc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c index 0a76f7f967859b..de568014da05cd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c index 3a74ce27599f6d..abce8ff9c67d14 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c index 1ab3fdbaf275dd..25da79e2d914d5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c index f875e26bac9465..0d83740e23b7d2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c index 7da0eb0e988a21..065ecabfad61d9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c index 969022df85ac79..fff24d86fca6e9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c index b03f80b32bc1e4..8ab0ac946814bd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c index 92e496ddcab8e3..b9fca136e0e836 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c index c7198bda16f31b..294cef4f0d7df9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c index 7f11665a1f84bf..cf86aea703d50d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c index c6f57b747e378b..919a2fa1b57988 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c index 53d5516cc8f056..2afaccc5db75d2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c index 753d83d7f702ac..49ba90c061bcc5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c index 7eee29cbb04642..b74f589814fe4b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c index 6d4e481861e494..cc6fcf629aa4f9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c index 38fd9305e517e6..4c877c226313fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c index 3496075dba3ee1..dff7701a0a9cee 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c index e6ab589ffdb4ba..0165dde122e1a3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c index 0234cc15780146..61bf0539266b9b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c index a2443b6036bcaf..21d0e62b82f34c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c index d2c764ad36a064..a3badb84fd9d91 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c index af35e8ac5dd06b..a70242d4ab5e2d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c index c631245c3c10b5..43e8acce98d1b9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index de03aadefdd11c..b134d7cfbf7c5c 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -167,10 +167,37 @@ static VectorTypeModifier getTupleVTM(unsigned NF) { static_cast(VectorTypeModifier::Tuple2) + (NF - 2)); } + +static unsigned getIndexedLoadStorePtrIdx(const RVVIntrinsic *RVVI) { + // We need a special rule for segment load/store since the data width is not + // encoded in the instrinsic name itself. + const StringRef IRName = RVVI->getIRName(); + constexpr unsigned RVV_VTA = 0x1; + constexpr unsigned RVV_VMA = 0x2; + + if (IRName.starts_with("vloxseg") || IRName.starts_with("vluxseg")) { + bool NoPassthru = + (RVVI->isMasked() && (RVVI->getPolicyAttrsBits() & RVV_VTA) && + (RVVI->getPolicyAttrsBits() & RVV_VMA)) | + (!RVVI->isMasked() && (RVVI->getPolicyAttrsBits() & RVV_VTA)); + return RVVI->isMasked() ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1; + } + if (IRName.starts_with("vsoxseg") || IRName.starts_with("vsuxseg")) + return RVVI->isMasked() ? 1 : 0; + + return (unsigned)-1; +} + // This function is used to get the log2SEW of each segment load/store, this // prevent to add a member to RVVIntrinsic. static unsigned getSegInstLog2SEW(StringRef InstName) { // clang-format off + // We need a special rule for indexed segment load/store since the data width + // is not encoded in the instrinsic name itself. + if (InstName.starts_with("vloxseg") || InstName.starts_with("vluxseg") || + InstName.starts_with("vsoxseg") || InstName.starts_with("vsuxseg")) + return (unsigned)-1; + #define KEY_VAL(KEY, VAL) {#KEY, VAL} #define KEY_VAL_ALL_W_POLICY(KEY, VAL) \ KEY_VAL(KEY, VAL), \ @@ -179,20 +206,20 @@ static unsigned getSegInstLog2SEW(StringRef InstName) { KEY_VAL(KEY ## _tumu, VAL), \ KEY_VAL(KEY ## _mu, VAL) -#define KEY_VAL_ALL_NF_BASE(MACRO_NAME, NAME, SEW, LOG2SEW, SUFFIX) \ - MACRO_NAME(NAME ## 2e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 3e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 4e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 5e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 6e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 7e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 8e ## SEW, LOG2SEW) +#define KEY_VAL_ALL_NF_BASE(MACRO_NAME, NAME, SEW, LOG2SEW, FF) \ + MACRO_NAME(NAME ## 2e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 3e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 4e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 5e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 6e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 7e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 8e ## SEW ## FF, LOG2SEW) #define KEY_VAL_ALL_NF(NAME, SEW, LOG2SEW) \ KEY_VAL_ALL_NF_BASE(KEY_VAL_ALL_W_POLICY, NAME, SEW, LOG2SEW,) #define KEY_VAL_FF_ALL_NF(NAME, SEW, LOG2SEW) \ - KEY_VAL_ALL_NF_BASE(KEY_VAL_ALL_W_POLICY, NAME, SEW, LOG2SEW, _FF) + KEY_VAL_ALL_NF_BASE(KEY_VAL_ALL_W_POLICY, NAME, SEW, LOG2SEW, ff) #define KEY_VAL_ALL_NF_SEW_BASE(MACRO_NAME, NAME) \ MACRO_NAME(NAME, 8, 3), \ @@ -208,11 +235,9 @@ static unsigned getSegInstLog2SEW(StringRef InstName) { // clang-format on static StringMap SegInsts = { - KEY_VAL_ALL_NF_SEW(vlseg), KEY_VAL_FF_ALL_NF_SEW(vlseg), - KEY_VAL_ALL_NF_SEW(vlsseg), KEY_VAL_ALL_NF_SEW(vloxseg), - KEY_VAL_ALL_NF_SEW(vluxseg), KEY_VAL_ALL_NF_SEW(vsseg), - KEY_VAL_ALL_NF_SEW(vssseg), KEY_VAL_ALL_NF_SEW(vsoxseg), - KEY_VAL_ALL_NF_SEW(vsuxseg)}; + KEY_VAL_ALL_NF_SEW(vlseg), KEY_VAL_FF_ALL_NF_SEW(vlseg), + KEY_VAL_ALL_NF_SEW(vlsseg), KEY_VAL_ALL_NF_SEW(vsseg), + KEY_VAL_ALL_NF_SEW(vssseg)}; #undef KEY_VAL_ALL_NF_SEW #undef KEY_VAL_ALL_NF @@ -231,6 +256,14 @@ void emitCodeGenSwitchBody(const RVVIntrinsic *RVVI, raw_ostream &OS) { if (RVVI->hasManualCodegen()) { OS << "IsMasked = " << (RVVI->isMasked() ? "true" : "false") << ";\n"; + + // Skip the non-indexed load/store and compatible header load/store. + OS << "if (SegInstSEW == (unsigned)-1) {\n"; + OS << " auto PointeeType = E->getArg(" << getIndexedLoadStorePtrIdx(RVVI) + << " )->getType()->getPointeeType();\n"; + OS << " SegInstSEW = " + " llvm::Log2_64(getContext().getTypeSize(PointeeType));\n}\n"; + OS << RVVI->getManualCodegen(); OS << "break;\n"; return; From 1ca0a8e5cd50f23faac7cf38a65fa0618100e0d8 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Tue, 8 Oct 2024 08:31:37 -0700 Subject: [PATCH 2/2] fixup! [clang][RISCV] Correct the SEW operand of indexed/fault only first segment intrinsics --- clang/utils/TableGen/RISCVVEmitter.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index b134d7cfbf7c5c..1b07bc446bf4e2 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -167,7 +167,6 @@ static VectorTypeModifier getTupleVTM(unsigned NF) { static_cast(VectorTypeModifier::Tuple2) + (NF - 2)); } - static unsigned getIndexedLoadStorePtrIdx(const RVVIntrinsic *RVVI) { // We need a special rule for segment load/store since the data width is not // encoded in the instrinsic name itself.